In [1]:
import warnings
warnings.filterwarnings('ignore')
In [2]:
# Importación librerías
import pandas as pd
import numpy as np
import keras
import tensorflow as tf

from numpy import array
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.preprocessing.sequence import TimeseriesGenerator

import os

import matplotlib.pyplot as plt
import plotly.graph_objects as go

import optuna
from optuna import Trial
from sklearn import metrics
from keras.callbacks import EarlyStopping,ReduceLROnPlateau

from sklearn.preprocessing import MinMaxScaler,StandardScaler

from pandas_datareader import data as pdr
import yfinance as yfin
yfin.pdr_override()
In [3]:
data=pdr.get_data_yahoo('BRK-B','2012-05-21','2023-02-28')
data=data[['Adj Close']]
data.columns=[['BRK.B']]
[*********************100%***********************]  1 of 1 completed
In [4]:
datapacf=data.diff()
datapacf=datapacf[1:]
datapacf
Out[4]:
BRK.B
Date
2012-05-22 -0.150002
2012-05-23 0.099998
2012-05-24 0.050003
2012-05-25 -0.550003
2012-05-29 0.570000
... ...
2023-02-21 -5.519989
2023-02-22 0.440002
2023-02-23 -0.089996
2023-02-24 0.949982
2023-02-27 0.640015

2709 rows × 1 columns

In [9]:
import matplotlib as mpl
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_pacf

with mpl.rc_context():
    mpl.rc("figure", figsize=(10,5))
    plot_pacf(datapacf, lags=50)
In [10]:
def shift_data(df,serie_name,period):
    for i in range(1,period+1):
        last=len(df.columns)
        df.insert(last, serie_name+'.'+str(i), df[serie_name].shift(periods=i))

shift_data(data,'BRK.B',10)
data=data.dropna()     
In [11]:
data_shift=data.copy()
In [12]:
data_shift.head()
Out[12]:
BRK.B BRK.B.1 BRK.B.2 BRK.B.3 BRK.B.4 BRK.B.5 BRK.B.6 BRK.B.7 BRK.B.8 BRK.B.9 BRK.B.10
Date
2012-06-05 79.120003 79.040001 79.019997 79.360001 78.830002 79.820000 79.250000 79.800003 79.750000 79.650002 79.800003
2012-06-06 80.680000 79.120003 79.040001 79.019997 79.360001 78.830002 79.820000 79.250000 79.800003 79.750000 79.650002
2012-06-07 80.660004 80.680000 79.120003 79.040001 79.019997 79.360001 78.830002 79.820000 79.250000 79.800003 79.750000
2012-06-08 81.360001 80.660004 80.680000 79.120003 79.040001 79.019997 79.360001 78.830002 79.820000 79.250000 79.800003
2012-06-11 80.279999 81.360001 80.660004 80.680000 79.120003 79.040001 79.019997 79.360001 78.830002 79.820000 79.250000
In [13]:
split_percent = 0.80

#20% para testing
splittest = int(split_percent*(len(data_shift)))

trainall=data_shift.iloc[:splittest,:]
scalerall = MinMaxScaler()
trainall_scaled = pd.DataFrame(scalerall.fit_transform(trainall), columns=trainall.columns)
xtrainall=trainall_scaled.iloc[:,1:]
ytrainall=trainall_scaled.iloc[:,0]


test=data_shift.iloc[splittest:,:]
test_scaled = pd.DataFrame(scalerall.transform(test), columns=test.columns)
xtest=test_scaled.iloc[:,1:]
ytest=test_scaled.iloc[:,0]

# del 80% usar 20% para validacion de la red neuronal
splittrain = int(split_percent*(len(data_shift[:splittest])))

train=data_shift.iloc[:splittrain,:]
scalerfirst = MinMaxScaler()
train_scaled = pd.DataFrame(scalerfirst.fit_transform(train), columns=train.columns)
xtrain=train_scaled.iloc[:,1:]
ytrain=train_scaled.iloc[:,0]

val=data_shift.iloc[splittrain:splittest,:]
val_scaled=pd.DataFrame(scalerfirst.transform(val), columns=val.columns)
xval=val_scaled.iloc[:,1:]
yval=val_scaled.iloc[:,0]
In [14]:
print(xtrain.shape[0])
print(xval.shape[0])
print(xtest.shape[0])
print(xtrainall.shape[0])
1728
432
540
2160
In [15]:
traindates=train.index
testdates=test.index
valdates=val.index
traindatesall=trainall.index
In [16]:
from sklearn.neighbors import LocalOutlierFactor
lof = LocalOutlierFactor()
yhat = lof.fit_predict(xtrain)

mask = yhat != -1
xtrain_no, ytrain_no,traindates = xtrain[mask], ytrain[mask],traindates[mask]

lof = LocalOutlierFactor()
yhat = lof.fit_predict(xtrainall)

mask = yhat != -1
xtrainall_no, ytrainall_no,traindates = xtrainall[mask], ytrainall[mask],traindatesall[mask]
In [17]:
ytrain_n=ytrain_no.to_numpy().reshape(-1,1,1)
xtrain_n=xtrain_no.to_numpy().reshape(-1,1,train.shape[1]-1)

yval_n=yval.to_numpy().reshape(-1,1,1)
xval_n=xval.to_numpy().reshape(-1,1,val.shape[1]-1)

ytest_n=ytest.to_numpy().reshape(-1,1,1)
xtest_n=xtest.to_numpy().reshape(-1,1,test.shape[1]-1)

ytrainall_n=ytrainall_no.to_numpy().reshape(-1,1,1)
xtrainall_n=xtrainall_no.to_numpy().reshape(-1,1,trainall.shape[1]-1)
In [18]:
import random
seed = 99
def random_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    tf.random.set_seed(seed)
random_seed(seed)
In [19]:
def objective(trial):
    keras.backend.clear_session()
    n_layers = trial.suggest_int('n_layers', 1, 4)
    model = keras.Sequential()
    for i in range(n_layers):
        num_hidden = trial.suggest_int(f'n_units_l{i}', train.shape[1]-1, 400, log=True)
        model.add(keras.layers.LSTM(num_hidden, input_shape=(1, train.shape[1]-1),return_sequences=True,
                               activation=trial.suggest_categorical(f'activation{i}', ['relu', 'linear','swish','sigmoid'])))
        model.add(keras.layers.Dropout(rate = trial.suggest_float(f'dropout{i}', 0.0, 0.5))) 
    
    model.add(keras.layers.Dense(1,activation=trial.suggest_categorical(f'finalact1', ['relu', 'linear','swish','sigmoid']))) 
    val_ds = (xval_n,yval_n)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=trial.suggest_float('LRfactor', 0.0, 0.5),patience=trial.suggest_int('LRpatience', 5, 20),min_lr=1e-05,verbose=0)
    model.compile(loss='mse', optimizer=trial.suggest_categorical(f'optimizer', ['Adagrad','adam', 'sgd','RMSprop']))
    run_history = model.fit(xtrain_n,ytrain_n,validation_data=val_ds,epochs=50,callbacks=[reduce_lr],verbose=0)
    return min(run_history.history['val_loss'])
In [20]:
study = optuna.create_study(direction="minimize")
study.optimize(objective, n_trials=60, timeout=1800)
print("Number of finished trials: {}".format(len(study.trials)))
print("Best trial:")
trial = study.best_trial
print("  Value: {}".format(trial.value))
[I 2023-05-07 11:42:42,454] A new study created in memory with name: no-name-84c7a779-04b7-4f7d-9fde-77cfc93b0c59
[I 2023-05-07 11:43:01,699] Trial 0 finished with value: 0.0018795349169522524 and parameters: {'n_layers': 4, 'n_units_l0': 55, 'activation0': 'swish', 'dropout0': 0.1199177859354284, 'n_units_l1': 18, 'activation1': 'linear', 'dropout1': 0.32812843986256823, 'n_units_l2': 141, 'activation2': 'swish', 'dropout2': 0.2800801922292878, 'n_units_l3': 114, 'activation3': 'linear', 'dropout3': 0.3224510646946709, 'finalact1': 'linear', 'LRfactor': 0.3689363399796488, 'LRpatience': 5, 'optimizer': 'adam'}. Best is trial 0 with value: 0.0018795349169522524.
[I 2023-05-07 11:43:21,236] Trial 1 finished with value: 0.00386535725556314 and parameters: {'n_layers': 4, 'n_units_l0': 13, 'activation0': 'swish', 'dropout0': 0.041892548476495484, 'n_units_l1': 173, 'activation1': 'sigmoid', 'dropout1': 0.013464276248870422, 'n_units_l2': 45, 'activation2': 'swish', 'dropout2': 0.48087277464623024, 'n_units_l3': 75, 'activation3': 'relu', 'dropout3': 0.47176496887664715, 'finalact1': 'swish', 'LRfactor': 0.2412775658285337, 'LRpatience': 11, 'optimizer': 'adam'}. Best is trial 0 with value: 0.0018795349169522524.
[I 2023-05-07 11:43:29,969] Trial 2 finished with value: 0.5091080069541931 and parameters: {'n_layers': 2, 'n_units_l0': 25, 'activation0': 'swish', 'dropout0': 0.1957381486279629, 'n_units_l1': 33, 'activation1': 'relu', 'dropout1': 0.391796961219781, 'finalact1': 'linear', 'LRfactor': 0.3215048801793688, 'LRpatience': 15, 'optimizer': 'Adagrad'}. Best is trial 0 with value: 0.0018795349169522524.
[I 2023-05-07 11:43:36,521] Trial 3 finished with value: 0.0019253452774137259 and parameters: {'n_layers': 1, 'n_units_l0': 63, 'activation0': 'relu', 'dropout0': 0.40208386749868197, 'finalact1': 'linear', 'LRfactor': 0.35958290232184387, 'LRpatience': 8, 'optimizer': 'adam'}. Best is trial 0 with value: 0.0018795349169522524.
[I 2023-05-07 11:44:00,153] Trial 4 finished with value: 0.004966916516423225 and parameters: {'n_layers': 3, 'n_units_l0': 33, 'activation0': 'relu', 'dropout0': 0.2965857028410737, 'n_units_l1': 131, 'activation1': 'relu', 'dropout1': 0.1420765454747454, 'n_units_l2': 394, 'activation2': 'sigmoid', 'dropout2': 0.4695462617507553, 'finalact1': 'sigmoid', 'LRfactor': 0.2989030092895136, 'LRpatience': 13, 'optimizer': 'RMSprop'}. Best is trial 0 with value: 0.0018795349169522524.
[I 2023-05-07 11:44:24,358] Trial 5 finished with value: 0.05426213890314102 and parameters: {'n_layers': 4, 'n_units_l0': 150, 'activation0': 'sigmoid', 'dropout0': 0.21155443483075564, 'n_units_l1': 140, 'activation1': 'linear', 'dropout1': 0.12148094946446503, 'n_units_l2': 279, 'activation2': 'swish', 'dropout2': 0.0844062625734962, 'n_units_l3': 25, 'activation3': 'sigmoid', 'dropout3': 0.3644667409293845, 'finalact1': 'swish', 'LRfactor': 0.05999948707633668, 'LRpatience': 6, 'optimizer': 'Adagrad'}. Best is trial 0 with value: 0.0018795349169522524.
[I 2023-05-07 11:44:33,323] Trial 6 finished with value: 0.0022018232848495245 and parameters: {'n_layers': 2, 'n_units_l0': 49, 'activation0': 'linear', 'dropout0': 0.30389057579644685, 'n_units_l1': 22, 'activation1': 'swish', 'dropout1': 0.16551063541414834, 'finalact1': 'sigmoid', 'LRfactor': 0.23137536007942577, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 0 with value: 0.0018795349169522524.
[I 2023-05-07 11:44:51,534] Trial 7 finished with value: 0.1836318075656891 and parameters: {'n_layers': 4, 'n_units_l0': 33, 'activation0': 'swish', 'dropout0': 0.21043429592300006, 'n_units_l1': 72, 'activation1': 'swish', 'dropout1': 0.2639025915788279, 'n_units_l2': 10, 'activation2': 'linear', 'dropout2': 0.38143139079799065, 'n_units_l3': 320, 'activation3': 'relu', 'dropout3': 0.03724301775794375, 'finalact1': 'relu', 'LRfactor': 0.3858600729902117, 'LRpatience': 11, 'optimizer': 'sgd'}. Best is trial 0 with value: 0.0018795349169522524.
[I 2023-05-07 11:45:08,048] Trial 8 finished with value: 0.2815978527069092 and parameters: {'n_layers': 3, 'n_units_l0': 96, 'activation0': 'sigmoid', 'dropout0': 0.1379791507618925, 'n_units_l1': 28, 'activation1': 'linear', 'dropout1': 0.39241047261913925, 'n_units_l2': 248, 'activation2': 'relu', 'dropout2': 0.29332708325145607, 'finalact1': 'linear', 'LRfactor': 0.06209643155233463, 'LRpatience': 19, 'optimizer': 'Adagrad'}. Best is trial 0 with value: 0.0018795349169522524.
[I 2023-05-07 11:45:33,312] Trial 9 finished with value: 0.0038534505292773247 and parameters: {'n_layers': 4, 'n_units_l0': 197, 'activation0': 'relu', 'dropout0': 0.2672468778058641, 'n_units_l1': 167, 'activation1': 'swish', 'dropout1': 0.14157829974518482, 'n_units_l2': 147, 'activation2': 'linear', 'dropout2': 0.496795065687438, 'n_units_l3': 76, 'activation3': 'sigmoid', 'dropout3': 0.12289759289754687, 'finalact1': 'sigmoid', 'LRfactor': 0.18536960131206442, 'LRpatience': 7, 'optimizer': 'adam'}. Best is trial 0 with value: 0.0018795349169522524.
[I 2023-05-07 11:45:52,572] Trial 10 finished with value: 0.18370501697063446 and parameters: {'n_layers': 3, 'n_units_l0': 391, 'activation0': 'linear', 'dropout0': 0.0030276492440440284, 'n_units_l1': 10, 'activation1': 'linear', 'dropout1': 0.4999144872909504, 'n_units_l2': 104, 'activation2': 'swish', 'dropout2': 0.18807067654301723, 'finalact1': 'relu', 'LRfactor': 0.4962974216070737, 'LRpatience': 5, 'optimizer': 'sgd'}. Best is trial 0 with value: 0.0018795349169522524.
[I 2023-05-07 11:45:59,138] Trial 11 finished with value: 0.0017562878783792257 and parameters: {'n_layers': 1, 'n_units_l0': 65, 'activation0': 'relu', 'dropout0': 0.42564447148560464, 'finalact1': 'linear', 'LRfactor': 0.40117687248611966, 'LRpatience': 8, 'optimizer': 'adam'}. Best is trial 11 with value: 0.0017562878783792257.
[I 2023-05-07 11:46:06,356] Trial 12 finished with value: 0.0018830097978934646 and parameters: {'n_layers': 1, 'n_units_l0': 77, 'activation0': 'relu', 'dropout0': 0.47782987797035503, 'finalact1': 'linear', 'LRfactor': 0.4510483440778943, 'LRpatience': 9, 'optimizer': 'adam'}. Best is trial 11 with value: 0.0017562878783792257.
[I 2023-05-07 11:46:25,825] Trial 13 finished with value: 0.0018126964569091797 and parameters: {'n_layers': 2, 'n_units_l0': 98, 'activation0': 'swish', 'dropout0': 0.36279816192772774, 'n_units_l1': 370, 'activation1': 'sigmoid', 'dropout1': 0.28903083287933945, 'finalact1': 'linear', 'LRfactor': 0.3828631319866444, 'LRpatience': 9, 'optimizer': 'adam'}. Best is trial 11 with value: 0.0017562878783792257.
[I 2023-05-07 11:46:33,660] Trial 14 finished with value: 0.0016946560936048627 and parameters: {'n_layers': 1, 'n_units_l0': 112, 'activation0': 'relu', 'dropout0': 0.37473897556218155, 'finalact1': 'linear', 'LRfactor': 0.43152022810349583, 'LRpatience': 10, 'optimizer': 'adam'}. Best is trial 14 with value: 0.0016946560936048627.
[I 2023-05-07 11:46:42,581] Trial 15 finished with value: 0.0013946568360552192 and parameters: {'n_layers': 1, 'n_units_l0': 144, 'activation0': 'relu', 'dropout0': 0.48666910047110246, 'finalact1': 'linear', 'LRfactor': 0.44872600173326666, 'LRpatience': 12, 'optimizer': 'adam'}. Best is trial 15 with value: 0.0013946568360552192.
[I 2023-05-07 11:46:52,420] Trial 16 finished with value: 0.0014057792723178864 and parameters: {'n_layers': 1, 'n_units_l0': 157, 'activation0': 'relu', 'dropout0': 0.49368199171958704, 'finalact1': 'linear', 'LRfactor': 0.4653197774814695, 'LRpatience': 15, 'optimizer': 'adam'}. Best is trial 15 with value: 0.0013946568360552192.
[I 2023-05-07 11:47:04,834] Trial 17 finished with value: 0.13004377484321594 and parameters: {'n_layers': 2, 'n_units_l0': 193, 'activation0': 'relu', 'dropout0': 0.4958733633635181, 'n_units_l1': 48, 'activation1': 'relu', 'dropout1': 0.03671206176689096, 'finalact1': 'swish', 'LRfactor': 0.4885428960009874, 'LRpatience': 15, 'optimizer': 'sgd'}. Best is trial 15 with value: 0.0013946568360552192.
[I 2023-05-07 11:47:18,505] Trial 18 finished with value: 0.0011038457741960883 and parameters: {'n_layers': 1, 'n_units_l0': 298, 'activation0': 'relu', 'dropout0': 0.4633983737960401, 'finalact1': 'relu', 'LRfactor': 0.4479031287517271, 'LRpatience': 16, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.0011038457741960883.
[I 2023-05-07 11:47:32,469] Trial 19 finished with value: 0.0029311126563698053 and parameters: {'n_layers': 1, 'n_units_l0': 318, 'activation0': 'sigmoid', 'dropout0': 0.43955978832220455, 'finalact1': 'relu', 'LRfactor': 0.4272081297883733, 'LRpatience': 17, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.0011038457741960883.
[I 2023-05-07 11:47:48,093] Trial 20 finished with value: 0.00206091720610857 and parameters: {'n_layers': 2, 'n_units_l0': 297, 'activation0': 'linear', 'dropout0': 0.35592007410859955, 'n_units_l1': 10, 'activation1': 'sigmoid', 'dropout1': 0.23377159830729557, 'finalact1': 'relu', 'LRfactor': 0.4919724545437627, 'LRpatience': 20, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.0011038457741960883.
[I 2023-05-07 11:47:59,504] Trial 21 finished with value: 0.0011294621508568525 and parameters: {'n_layers': 1, 'n_units_l0': 221, 'activation0': 'relu', 'dropout0': 0.49654998090511693, 'finalact1': 'relu', 'LRfactor': 0.44785974525368555, 'LRpatience': 14, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.0011038457741960883.
[I 2023-05-07 11:48:11,043] Trial 22 finished with value: 0.0012250422732904553 and parameters: {'n_layers': 1, 'n_units_l0': 234, 'activation0': 'relu', 'dropout0': 0.4638429768366365, 'finalact1': 'relu', 'LRfactor': 0.4328409690469952, 'LRpatience': 13, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.0011038457741960883.
[I 2023-05-07 11:48:23,229] Trial 23 finished with value: 0.0011394271859899163 and parameters: {'n_layers': 1, 'n_units_l0': 247, 'activation0': 'relu', 'dropout0': 0.4479314655547021, 'finalact1': 'relu', 'LRfactor': 0.4220508356036114, 'LRpatience': 14, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.0011038457741960883.
[I 2023-05-07 11:48:49,282] Trial 24 finished with value: 0.001911777420900762 and parameters: {'n_layers': 2, 'n_units_l0': 252, 'activation0': 'relu', 'dropout0': 0.4403858184132113, 'n_units_l1': 393, 'activation1': 'sigmoid', 'dropout1': 0.48934615572683027, 'finalact1': 'relu', 'LRfactor': 0.33016960773860987, 'LRpatience': 16, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.0011038457741960883.
[I 2023-05-07 11:49:04,817] Trial 25 finished with value: 0.001237346208654344 and parameters: {'n_layers': 1, 'n_units_l0': 378, 'activation0': 'relu', 'dropout0': 0.41449657211193897, 'finalact1': 'relu', 'LRfactor': 0.4086680597886499, 'LRpatience': 14, 'optimizer': 'RMSprop'}. Best is trial 18 with value: 0.0011038457741960883.
[I 2023-05-07 11:49:17,489] Trial 26 finished with value: 0.0010647089220583439 and parameters: {'n_layers': 1, 'n_units_l0': 266, 'activation0': 'relu', 'dropout0': 0.4549928693911923, 'finalact1': 'relu', 'LRfactor': 0.4492395819376679, 'LRpatience': 17, 'optimizer': 'RMSprop'}. Best is trial 26 with value: 0.0010647089220583439.
[I 2023-05-07 11:49:35,583] Trial 27 finished with value: 0.0017012341413646936 and parameters: {'n_layers': 2, 'n_units_l0': 308, 'activation0': 'relu', 'dropout0': 0.4979080143868245, 'n_units_l1': 66, 'activation1': 'swish', 'dropout1': 0.057566963943832705, 'finalact1': 'relu', 'LRfactor': 0.4626023803042737, 'LRpatience': 17, 'optimizer': 'RMSprop'}. Best is trial 26 with value: 0.0010647089220583439.
[I 2023-05-07 11:49:46,156] Trial 28 finished with value: 0.000993031426332891 and parameters: {'n_layers': 1, 'n_units_l0': 196, 'activation0': 'linear', 'dropout0': 0.3940256887459795, 'finalact1': 'relu', 'LRfactor': 0.34770745224563365, 'LRpatience': 17, 'optimizer': 'RMSprop'}. Best is trial 28 with value: 0.000993031426332891.
[I 2023-05-07 11:49:58,106] Trial 29 finished with value: 0.0013623634586110711 and parameters: {'n_layers': 2, 'n_units_l0': 156, 'activation0': 'linear', 'dropout0': 0.400261667527055, 'n_units_l1': 16, 'activation1': 'relu', 'dropout1': 0.21315712287224176, 'finalact1': 'relu', 'LRfactor': 0.3669231167194035, 'LRpatience': 20, 'optimizer': 'RMSprop'}. Best is trial 28 with value: 0.000993031426332891.
[I 2023-05-07 11:50:13,544] Trial 30 finished with value: 0.0009315931238234043 and parameters: {'n_layers': 1, 'n_units_l0': 392, 'activation0': 'linear', 'dropout0': 0.3445111153463455, 'finalact1': 'relu', 'LRfactor': 0.34937936318797136, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 30 with value: 0.0009315931238234043.
[I 2023-05-07 11:50:29,263] Trial 31 finished with value: 0.0009470797376707196 and parameters: {'n_layers': 1, 'n_units_l0': 391, 'activation0': 'linear', 'dropout0': 0.33563063360579576, 'finalact1': 'relu', 'LRfactor': 0.3617957586456325, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 30 with value: 0.0009315931238234043.
[I 2023-05-07 11:50:44,527] Trial 32 finished with value: 0.0010182333644479513 and parameters: {'n_layers': 1, 'n_units_l0': 377, 'activation0': 'linear', 'dropout0': 0.3364526838123031, 'finalact1': 'relu', 'LRfactor': 0.3503337825333795, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 30 with value: 0.0009315931238234043.
[I 2023-05-07 11:50:59,929] Trial 33 finished with value: 0.0008834060863591731 and parameters: {'n_layers': 1, 'n_units_l0': 395, 'activation0': 'linear', 'dropout0': 0.33487494998617784, 'finalact1': 'relu', 'LRfactor': 0.32634955159434603, 'LRpatience': 19, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:51:14,933] Trial 34 finished with value: 0.00093073770403862 and parameters: {'n_layers': 1, 'n_units_l0': 372, 'activation0': 'linear', 'dropout0': 0.3215412426297781, 'finalact1': 'swish', 'LRfactor': 0.30366317433128714, 'LRpatience': 19, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:51:32,546] Trial 35 finished with value: 0.1834375560283661 and parameters: {'n_layers': 2, 'n_units_l0': 378, 'activation0': 'linear', 'dropout0': 0.33306621729695923, 'n_units_l1': 48, 'activation1': 'sigmoid', 'dropout1': 0.09555484816212478, 'finalact1': 'swish', 'LRfactor': 0.299717876213519, 'LRpatience': 19, 'optimizer': 'Adagrad'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:51:46,136] Trial 36 finished with value: 0.0009684238466434181 and parameters: {'n_layers': 1, 'n_units_l0': 308, 'activation0': 'linear', 'dropout0': 0.30682863309072356, 'finalact1': 'swish', 'LRfactor': 0.32011041825085096, 'LRpatience': 19, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:52:01,724] Trial 37 finished with value: 0.0009034103131853044 and parameters: {'n_layers': 1, 'n_units_l0': 395, 'activation0': 'linear', 'dropout0': 0.2615719591277655, 'finalact1': 'swish', 'LRfactor': 0.2693454892093074, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:52:20,858] Trial 38 finished with value: 0.17846520245075226 and parameters: {'n_layers': 3, 'n_units_l0': 320, 'activation0': 'linear', 'dropout0': 0.2624437137037499, 'n_units_l1': 96, 'activation1': 'linear', 'dropout1': 0.006335820092105926, 'n_units_l2': 45, 'activation2': 'sigmoid', 'dropout2': 0.04004745492697959, 'finalact1': 'swish', 'LRfactor': 0.2735878825492519, 'LRpatience': 20, 'optimizer': 'sgd'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:52:38,846] Trial 39 finished with value: 0.44231346249580383 and parameters: {'n_layers': 2, 'n_units_l0': 260, 'activation0': 'linear', 'dropout0': 0.28377678836325493, 'n_units_l1': 228, 'activation1': 'relu', 'dropout1': 0.18656387027524543, 'finalact1': 'swish', 'LRfactor': 0.2616941968965543, 'LRpatience': 19, 'optimizer': 'Adagrad'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:52:53,096] Trial 40 finished with value: 0.0010184922721236944 and parameters: {'n_layers': 1, 'n_units_l0': 340, 'activation0': 'linear', 'dropout0': 0.23169236171309443, 'finalact1': 'swish', 'LRfactor': 0.22765596337552366, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:53:08,503] Trial 41 finished with value: 0.000991162029094994 and parameters: {'n_layers': 1, 'n_units_l0': 390, 'activation0': 'linear', 'dropout0': 0.3159767177344394, 'finalact1': 'swish', 'LRfactor': 0.29986291817531974, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:53:23,779] Trial 42 finished with value: 0.0035328457597643137 and parameters: {'n_layers': 1, 'n_units_l0': 388, 'activation0': 'linear', 'dropout0': 0.2864599192410159, 'finalact1': 'sigmoid', 'LRfactor': 0.3306883272753759, 'LRpatience': 16, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:53:37,206] Trial 43 finished with value: 0.0009403415606357157 and parameters: {'n_layers': 1, 'n_units_l0': 279, 'activation0': 'linear', 'dropout0': 0.32621285214823603, 'finalact1': 'swish', 'LRfactor': 0.36008520211641115, 'LRpatience': 19, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:53:50,919] Trial 44 finished with value: 0.0010673595825210214 and parameters: {'n_layers': 1, 'n_units_l0': 275, 'activation0': 'linear', 'dropout0': 0.2504107051837358, 'finalact1': 'swish', 'LRfactor': 0.3922162215431196, 'LRpatience': 20, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:54:02,906] Trial 45 finished with value: 0.0013499222695827484 and parameters: {'n_layers': 1, 'n_units_l0': 227, 'activation0': 'swish', 'dropout0': 0.27410502719570695, 'finalact1': 'swish', 'LRfactor': 0.2859248115428441, 'LRpatience': 19, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:54:17,203] Trial 46 finished with value: 0.12779170274734497 and parameters: {'n_layers': 1, 'n_units_l0': 342, 'activation0': 'sigmoid', 'dropout0': 0.31541010849616363, 'finalact1': 'swish', 'LRfactor': 0.23696408295524934, 'LRpatience': 19, 'optimizer': 'Adagrad'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:54:31,477] Trial 47 finished with value: 0.16321918368339539 and parameters: {'n_layers': 2, 'n_units_l0': 190, 'activation0': 'linear', 'dropout0': 0.29224736166908766, 'n_units_l1': 94, 'activation1': 'swish', 'dropout1': 0.08967139318751416, 'finalact1': 'sigmoid', 'LRfactor': 0.3762446005440564, 'LRpatience': 17, 'optimizer': 'sgd'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:54:50,824] Trial 48 finished with value: 0.001866529812105 and parameters: {'n_layers': 3, 'n_units_l0': 277, 'activation0': 'linear', 'dropout0': 0.36989942829672046, 'n_units_l1': 41, 'activation1': 'linear', 'dropout1': 0.23240014376448712, 'n_units_l2': 18, 'activation2': 'relu', 'dropout2': 0.16129762880652573, 'finalact1': 'swish', 'LRfactor': 0.3032250428840391, 'LRpatience': 20, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:55:04,452] Trial 49 finished with value: 0.0009485161281190813 and parameters: {'n_layers': 1, 'n_units_l0': 333, 'activation0': 'linear', 'dropout0': 0.23150101402193615, 'finalact1': 'swish', 'LRfactor': 0.33724146956084566, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:55:29,676] Trial 50 finished with value: 0.18073926866054535 and parameters: {'n_layers': 4, 'n_units_l0': 208, 'activation0': 'swish', 'dropout0': 0.35047931690653394, 'n_units_l1': 241, 'activation1': 'swish', 'dropout1': 0.19917800096169447, 'n_units_l2': 26, 'activation2': 'sigmoid', 'dropout2': 0.016669032756104485, 'n_units_l3': 10, 'activation3': 'swish', 'dropout3': 0.18670295383521887, 'finalact1': 'swish', 'LRfactor': 0.31975267776029737, 'LRpatience': 16, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:55:45,177] Trial 51 finished with value: 0.0035052436869591475 and parameters: {'n_layers': 1, 'n_units_l0': 391, 'activation0': 'linear', 'dropout0': 0.3209345944006158, 'finalact1': 'sigmoid', 'LRfactor': 0.3627202376695652, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:55:59,617] Trial 52 finished with value: 0.0009162458591163158 and parameters: {'n_layers': 1, 'n_units_l0': 337, 'activation0': 'linear', 'dropout0': 0.3412109156541663, 'finalact1': 'swish', 'LRfactor': 0.362614682339554, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:56:12,508] Trial 53 finished with value: 0.0009549758397042751 and parameters: {'n_layers': 1, 'n_units_l0': 280, 'activation0': 'linear', 'dropout0': 0.3774123866843274, 'finalact1': 'swish', 'LRfactor': 0.38061793687596207, 'LRpatience': 19, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:56:26,808] Trial 54 finished with value: 0.0009356298251077533 and parameters: {'n_layers': 1, 'n_units_l0': 338, 'activation0': 'linear', 'dropout0': 0.29971265161797317, 'finalact1': 'swish', 'LRfactor': 0.34851261667023714, 'LRpatience': 20, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:56:40,170] Trial 55 finished with value: 0.060622915625572205 and parameters: {'n_layers': 1, 'n_units_l0': 340, 'activation0': 'sigmoid', 'dropout0': 0.3012875930434517, 'finalact1': 'swish', 'LRfactor': 0.3150445625975836, 'LRpatience': 20, 'optimizer': 'sgd'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:56:52,149] Trial 56 finished with value: 0.0009351918124593794 and parameters: {'n_layers': 1, 'n_units_l0': 239, 'activation0': 'linear', 'dropout0': 0.3519188423235551, 'finalact1': 'swish', 'LRfactor': 0.34144457831195163, 'LRpatience': 18, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:57:03,117] Trial 57 finished with value: 0.0018047972116619349 and parameters: {'n_layers': 1, 'n_units_l0': 226, 'activation0': 'linear', 'dropout0': 0.35001742262780705, 'finalact1': 'swish', 'LRfactor': 0.28190181023105343, 'LRpatience': 15, 'optimizer': 'Adagrad'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:57:15,438] Trial 58 finished with value: 0.000978425145149231 and parameters: {'n_layers': 1, 'n_units_l0': 249, 'activation0': 'linear', 'dropout0': 0.3532073728841493, 'finalact1': 'swish', 'LRfactor': 0.3353875369405577, 'LRpatience': 17, 'optimizer': 'RMSprop'}. Best is trial 33 with value: 0.0008834060863591731.
[I 2023-05-07 11:57:32,878] Trial 59 finished with value: 0.0016654889332130551 and parameters: {'n_layers': 2, 'n_units_l0': 299, 'activation0': 'swish', 'dropout0': 0.38381441389549076, 'n_units_l1': 34, 'activation1': 'sigmoid', 'dropout1': 0.06752333812417599, 'finalact1': 'linear', 'LRfactor': 0.40219072210466444, 'LRpatience': 16, 'optimizer': 'adam'}. Best is trial 33 with value: 0.0008834060863591731.
Number of finished trials: 60
Best trial:
  Value: 0.0008834060863591731
In [21]:
print("  Params: ")
for key, value in trial.params.items():
    print("    {}: {}".format(key, value))
  Params: 
    n_layers: 1
    n_units_l0: 395
    activation0: linear
    dropout0: 0.33487494998617784
    finalact1: relu
    LRfactor: 0.32634955159434603
    LRpatience: 19
    optimizer: RMSprop
In [22]:
#From Optuna same data
model =Sequential()
model.add(keras.layers.LSTM(395,input_shape=(1, train.shape[1]-1),return_sequences=True,activation=tf.keras.activations.linear))
model.add(keras.layers.Dropout(0.33))
model.add(keras.layers.Dense(1,activation=tf.keras.activations.relu))
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.32,patience=19,min_lr=1e-05,verbose=0)
early_stoping = EarlyStopping(monitor="val_loss",min_delta=0,patience=19,verbose=0,mode="auto",restore_best_weights=True)
model.compile(optimizer='RMSprop',loss='mse')
val_ds = (xval_n,yval_n)
history = model.fit(xtrain_n,ytrain_n,validation_data=val_ds,epochs=200,verbose=0,callbacks=[early_stoping,reduce_lr])
In [23]:
hist=pd.DataFrame(history.history)
hist['epoch']=history.epoch

trace1 = go.Scatter(
x=hist['epoch'], y=hist['loss'],
    mode='lines', name='Train_loss'
)
trace2 = go.Scatter(
x=hist['epoch'], y=hist['val_loss'],
    mode='lines', name='Val_loss'
)

layout= go.Layout(
title= 'Optuna Loss best model History',
xaxis={'title':'Epoch'},
yaxis={'title':'Loss'}
)

fig= go.Figure(data=[trace1,trace2], layout=layout)
fig.show()
In [24]:
#From Optuna
model =Sequential()
model.add(keras.layers.LSTM(395,input_shape=(1, train.shape[1]-1),return_sequences=True,activation=tf.keras.activations.linear))
model.add(keras.layers.Dropout(0.33))
model.add(keras.layers.Dense(1,activation=tf.keras.activations.relu))
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.32,patience=19,min_lr=1e-05,verbose=0)
early_stoping = EarlyStopping(monitor="loss",min_delta=0,patience=19,verbose=0,mode="auto",restore_best_weights=True)
model.compile(optimizer='RMSprop',loss='mse')
history = model.fit(xtrainall_n,ytrainall_n,epochs=200,verbose=1,callbacks=[early_stoping,reduce_lr])
Epoch 1/200
63/63 [==============================] - 1s 5ms/step - loss: 0.0119 - lr: 0.0010
Epoch 2/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0017 - lr: 0.0010
Epoch 3/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0017 - lr: 0.0010
Epoch 4/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0016 - lr: 0.0010
Epoch 5/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0015 - lr: 0.0010
Epoch 6/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0014 - lr: 0.0010
Epoch 7/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0013 - lr: 0.0010
Epoch 8/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0014 - lr: 0.0010
Epoch 9/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0013 - lr: 0.0010
Epoch 10/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0013 - lr: 0.0010
Epoch 11/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0013 - lr: 0.0010
Epoch 12/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0013 - lr: 0.0010
Epoch 13/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0014 - lr: 0.0010
Epoch 14/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0013 - lr: 0.0010
Epoch 15/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0011 - lr: 0.0010
Epoch 16/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0012 - lr: 0.0010
Epoch 17/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0011 - lr: 0.0010
Epoch 18/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0012 - lr: 0.0010
Epoch 19/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0012 - lr: 0.0010
Epoch 20/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0010 - lr: 0.0010
Epoch 21/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0011 - lr: 0.0010
Epoch 22/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0010 - lr: 0.0010
Epoch 23/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0010 - lr: 0.0010
Epoch 24/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0010 - lr: 0.0010
Epoch 25/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0011 - lr: 0.0010
Epoch 26/200
63/63 [==============================] - 0s 4ms/step - loss: 9.5516e-04 - lr: 0.0010
Epoch 27/200
63/63 [==============================] - 0s 4ms/step - loss: 9.8052e-04 - lr: 0.0010
Epoch 28/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0011 - lr: 0.0010
Epoch 29/200
63/63 [==============================] - 0s 4ms/step - loss: 9.8774e-04 - lr: 0.0010
Epoch 30/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0010 - lr: 0.0010
Epoch 31/200
63/63 [==============================] - 0s 4ms/step - loss: 9.5273e-04 - lr: 0.0010
Epoch 32/200
63/63 [==============================] - 0s 4ms/step - loss: 9.7532e-04 - lr: 0.0010
Epoch 33/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0010 - lr: 0.0010
Epoch 34/200
63/63 [==============================] - 0s 4ms/step - loss: 0.0010 - lr: 0.0010
Epoch 35/200
63/63 [==============================] - 0s 4ms/step - loss: 8.7886e-04 - lr: 0.0010
Epoch 36/200
63/63 [==============================] - 0s 4ms/step - loss: 9.2788e-04 - lr: 0.0010
Epoch 37/200
63/63 [==============================] - 0s 4ms/step - loss: 9.4218e-04 - lr: 0.0010
Epoch 38/200
63/63 [==============================] - 0s 4ms/step - loss: 8.7703e-04 - lr: 0.0010
Epoch 39/200
63/63 [==============================] - 0s 4ms/step - loss: 9.0323e-04 - lr: 0.0010
Epoch 40/200
63/63 [==============================] - 0s 4ms/step - loss: 8.3723e-04 - lr: 0.0010
Epoch 41/200
63/63 [==============================] - 0s 4ms/step - loss: 8.7464e-04 - lr: 0.0010
Epoch 42/200
63/63 [==============================] - 0s 4ms/step - loss: 8.6585e-04 - lr: 0.0010
Epoch 43/200
63/63 [==============================] - 0s 4ms/step - loss: 8.8462e-04 - lr: 0.0010
Epoch 44/200
63/63 [==============================] - 0s 4ms/step - loss: 8.7924e-04 - lr: 0.0010
Epoch 45/200
63/63 [==============================] - 0s 4ms/step - loss: 9.0271e-04 - lr: 0.0010
Epoch 46/200
63/63 [==============================] - 0s 4ms/step - loss: 8.5350e-04 - lr: 0.0010
Epoch 47/200
63/63 [==============================] - 0s 4ms/step - loss: 7.2304e-04 - lr: 0.0010
Epoch 48/200
63/63 [==============================] - 0s 4ms/step - loss: 8.9083e-04 - lr: 0.0010
Epoch 49/200
63/63 [==============================] - 0s 4ms/step - loss: 7.7575e-04 - lr: 0.0010
Epoch 50/200
63/63 [==============================] - 0s 4ms/step - loss: 9.0407e-04 - lr: 0.0010
Epoch 51/200
63/63 [==============================] - 0s 4ms/step - loss: 8.9126e-04 - lr: 0.0010
Epoch 52/200
63/63 [==============================] - 0s 4ms/step - loss: 8.1057e-04 - lr: 0.0010
Epoch 53/200
63/63 [==============================] - 0s 4ms/step - loss: 8.3055e-04 - lr: 0.0010
Epoch 54/200
63/63 [==============================] - 0s 4ms/step - loss: 7.4824e-04 - lr: 0.0010
Epoch 55/200
63/63 [==============================] - 0s 4ms/step - loss: 7.7408e-04 - lr: 0.0010
Epoch 56/200
63/63 [==============================] - 0s 4ms/step - loss: 7.5293e-04 - lr: 0.0010
Epoch 57/200
63/63 [==============================] - 0s 4ms/step - loss: 7.7909e-04 - lr: 0.0010
Epoch 58/200
63/63 [==============================] - 0s 4ms/step - loss: 7.7699e-04 - lr: 0.0010
Epoch 59/200
63/63 [==============================] - 0s 4ms/step - loss: 8.0924e-04 - lr: 0.0010
Epoch 60/200
63/63 [==============================] - 0s 4ms/step - loss: 7.8519e-04 - lr: 0.0010
Epoch 61/200
63/63 [==============================] - 0s 4ms/step - loss: 8.1256e-04 - lr: 0.0010
Epoch 62/200
63/63 [==============================] - 0s 4ms/step - loss: 8.5315e-04 - lr: 0.0010
Epoch 63/200
63/63 [==============================] - 0s 5ms/step - loss: 7.7993e-04 - lr: 0.0010
Epoch 64/200
63/63 [==============================] - 0s 4ms/step - loss: 7.8999e-04 - lr: 0.0010
Epoch 65/200
63/63 [==============================] - 0s 4ms/step - loss: 7.1639e-04 - lr: 0.0010
Epoch 66/200
63/63 [==============================] - 0s 5ms/step - loss: 7.1440e-04 - lr: 0.0010
Epoch 67/200
63/63 [==============================] - 0s 5ms/step - loss: 5.2405e-04 - lr: 3.2000e-04
Epoch 68/200
63/63 [==============================] - 0s 4ms/step - loss: 4.8490e-04 - lr: 3.2000e-04
Epoch 69/200
63/63 [==============================] - 0s 4ms/step - loss: 5.1345e-04 - lr: 3.2000e-04
Epoch 70/200
63/63 [==============================] - 0s 4ms/step - loss: 5.5136e-04 - lr: 3.2000e-04
Epoch 71/200
63/63 [==============================] - 0s 4ms/step - loss: 4.9775e-04 - lr: 3.2000e-04
Epoch 72/200
63/63 [==============================] - 0s 4ms/step - loss: 4.9938e-04 - lr: 3.2000e-04
Epoch 73/200
63/63 [==============================] - 0s 4ms/step - loss: 5.0865e-04 - lr: 3.2000e-04
Epoch 74/200
63/63 [==============================] - 0s 4ms/step - loss: 5.0696e-04 - lr: 3.2000e-04
Epoch 75/200
63/63 [==============================] - 0s 4ms/step - loss: 5.1565e-04 - lr: 3.2000e-04
Epoch 76/200
63/63 [==============================] - 0s 4ms/step - loss: 5.3285e-04 - lr: 3.2000e-04
Epoch 77/200
63/63 [==============================] - 0s 4ms/step - loss: 4.8119e-04 - lr: 3.2000e-04
Epoch 78/200
63/63 [==============================] - 0s 4ms/step - loss: 5.0307e-04 - lr: 3.2000e-04
Epoch 79/200
63/63 [==============================] - 0s 4ms/step - loss: 4.8170e-04 - lr: 3.2000e-04
Epoch 80/200
63/63 [==============================] - 0s 4ms/step - loss: 4.8548e-04 - lr: 3.2000e-04
Epoch 81/200
63/63 [==============================] - 0s 4ms/step - loss: 4.9325e-04 - lr: 3.2000e-04
Epoch 82/200
63/63 [==============================] - 0s 4ms/step - loss: 4.4446e-04 - lr: 3.2000e-04
Epoch 83/200
63/63 [==============================] - 0s 4ms/step - loss: 4.8138e-04 - lr: 3.2000e-04
Epoch 84/200
63/63 [==============================] - 0s 4ms/step - loss: 4.8335e-04 - lr: 3.2000e-04
Epoch 85/200
63/63 [==============================] - 0s 4ms/step - loss: 4.7334e-04 - lr: 3.2000e-04
Epoch 86/200
63/63 [==============================] - 0s 4ms/step - loss: 4.3415e-04 - lr: 3.2000e-04
Epoch 87/200
63/63 [==============================] - 0s 4ms/step - loss: 4.2291e-04 - lr: 1.0240e-04
Epoch 88/200
63/63 [==============================] - 0s 4ms/step - loss: 4.6118e-04 - lr: 1.0240e-04
Epoch 89/200
63/63 [==============================] - 0s 4ms/step - loss: 4.6915e-04 - lr: 1.0240e-04
Epoch 90/200
63/63 [==============================] - 0s 4ms/step - loss: 4.1939e-04 - lr: 1.0240e-04
Epoch 91/200
63/63 [==============================] - 0s 4ms/step - loss: 4.6818e-04 - lr: 1.0240e-04
Epoch 92/200
63/63 [==============================] - 0s 4ms/step - loss: 4.5304e-04 - lr: 1.0240e-04
Epoch 93/200
63/63 [==============================] - 0s 4ms/step - loss: 4.1804e-04 - lr: 1.0240e-04
Epoch 94/200
63/63 [==============================] - 0s 5ms/step - loss: 4.5832e-04 - lr: 1.0240e-04
Epoch 95/200
63/63 [==============================] - 0s 4ms/step - loss: 4.3850e-04 - lr: 1.0240e-04
Epoch 96/200
63/63 [==============================] - 0s 4ms/step - loss: 4.4817e-04 - lr: 1.0240e-04
Epoch 97/200
63/63 [==============================] - 0s 4ms/step - loss: 4.5325e-04 - lr: 1.0240e-04
Epoch 98/200
63/63 [==============================] - 0s 4ms/step - loss: 4.2644e-04 - lr: 1.0240e-04
Epoch 99/200
63/63 [==============================] - 0s 4ms/step - loss: 4.6276e-04 - lr: 1.0240e-04
Epoch 100/200
63/63 [==============================] - 0s 5ms/step - loss: 4.6371e-04 - lr: 1.0240e-04
Epoch 101/200
63/63 [==============================] - 0s 5ms/step - loss: 4.5725e-04 - lr: 1.0240e-04
Epoch 102/200
63/63 [==============================] - 0s 5ms/step - loss: 4.2968e-04 - lr: 1.0240e-04
Epoch 103/200
63/63 [==============================] - 0s 5ms/step - loss: 4.4217e-04 - lr: 1.0240e-04
Epoch 104/200
63/63 [==============================] - 0s 5ms/step - loss: 4.5706e-04 - lr: 1.0240e-04
Epoch 105/200
63/63 [==============================] - 0s 5ms/step - loss: 3.8984e-04 - lr: 1.0240e-04
Epoch 106/200
63/63 [==============================] - 0s 4ms/step - loss: 4.3996e-04 - lr: 1.0240e-04
Epoch 107/200
63/63 [==============================] - 0s 5ms/step - loss: 4.3784e-04 - lr: 3.2768e-05
Epoch 108/200
63/63 [==============================] - 0s 4ms/step - loss: 4.5582e-04 - lr: 3.2768e-05
Epoch 109/200
63/63 [==============================] - 0s 5ms/step - loss: 4.5335e-04 - lr: 3.2768e-05
Epoch 110/200
63/63 [==============================] - 0s 4ms/step - loss: 4.4812e-04 - lr: 3.2768e-05
Epoch 111/200
63/63 [==============================] - 0s 5ms/step - loss: 4.2332e-04 - lr: 3.2768e-05
Epoch 112/200
63/63 [==============================] - 0s 4ms/step - loss: 4.4087e-04 - lr: 3.2768e-05
Epoch 113/200
63/63 [==============================] - 0s 4ms/step - loss: 4.3764e-04 - lr: 3.2768e-05
Epoch 114/200
63/63 [==============================] - 0s 4ms/step - loss: 4.5838e-04 - lr: 3.2768e-05
Epoch 115/200
63/63 [==============================] - 0s 4ms/step - loss: 4.2392e-04 - lr: 3.2768e-05
Epoch 116/200
63/63 [==============================] - 0s 4ms/step - loss: 4.3013e-04 - lr: 3.2768e-05
Epoch 117/200
63/63 [==============================] - 0s 4ms/step - loss: 4.6801e-04 - lr: 3.2768e-05
Epoch 118/200
63/63 [==============================] - 0s 4ms/step - loss: 4.4877e-04 - lr: 3.2768e-05
Epoch 119/200
63/63 [==============================] - 0s 4ms/step - loss: 4.3747e-04 - lr: 3.2768e-05
Epoch 120/200
63/63 [==============================] - 0s 4ms/step - loss: 3.9953e-04 - lr: 3.2768e-05
Epoch 121/200
63/63 [==============================] - 0s 4ms/step - loss: 4.1250e-04 - lr: 3.2768e-05
Epoch 122/200
63/63 [==============================] - 0s 4ms/step - loss: 4.5270e-04 - lr: 3.2768e-05
Epoch 123/200
63/63 [==============================] - 0s 4ms/step - loss: 4.3087e-04 - lr: 3.2768e-05
Epoch 124/200
63/63 [==============================] - 0s 5ms/step - loss: 4.3297e-04 - lr: 3.2768e-05
In [25]:
prediction=model.predict(xtest_n)
17/17 [==============================] - 0s 2ms/step
In [26]:
final_values=[]
for i in range(len(prediction)):
    final_values.append(prediction[i][0][0])
In [27]:
df_final = pd.DataFrame(0, index=np.arange(len(test)), columns=test.columns)
In [28]:
df_final['BRK.B']=final_values
In [29]:
df_final = scalerall.inverse_transform(df_final)
In [30]:
final_values_rescaled=[]
for i in range(len(df_final)):
    final_values_rescaled.append(df_final[i][0])
In [32]:
trace1 = go.Scatter(
x=pd.to_datetime(traindatesall), y=trainall.iloc[:,0],
    mode='lines', name='Datatrain'
)
trace2 = go.Scatter(
x=pd.to_datetime(testdates), y=test.iloc[:,0],
    mode='lines', name='Datatest'
)

trace3 = go.Scatter(
x=pd.to_datetime(testdates), y=final_values_rescaled,
    mode='lines', name='Prediction'
)
layout= go.Layout(
title= 'BRBK Forecast',
xaxis={'title':'Date'},
yaxis={'title':'Close'}
)

fig= go.Figure(data=[trace1,trace2,trace3], layout=layout)
fig.show()
In [33]:
from sklearn.metrics import r2_score,mean_squared_error
import math
r2_score(test.iloc[:,0], final_values_rescaled)
Out[33]:
0.9587073344270336
In [34]:
mse = mean_squared_error(test.iloc[:,0], final_values_rescaled)
rmse = math.sqrt(mse)
rmse
Out[34]:
5.264484594843866
In [36]:
plt.plot(test.iloc[:,0], final_values_rescaled, 'bo')
plt.show()
In [37]:
data_new=pdr.get_data_yahoo('BRK-B','2023-02-14','2023-05-05')
data_new=data_new[['Adj Close']]
data_new.columns=[['BRK.B']]
[*********************100%***********************]  1 of 1 completed
In [38]:
shift_data(data_new,'BRK.B',10)
data_new=data_new.dropna() 
In [39]:
test_new = pd.DataFrame(scalerall.transform(data_new), columns=data_new.columns)
xtestnew=test_new.iloc[:,1:]
ytestnew=test_new.iloc[:,0]
xtestnew=xtestnew.to_numpy().reshape(-1,1,test_new.shape[1]-1)
ytestnew=ytestnew.to_numpy().reshape(-1,1,1)
prediction_new=model.predict(xtestnew)
2/2 [==============================] - 0s 2ms/step
In [40]:
test_new_dates=data_new.index
In [41]:
final_values_new=[]
for i in range(len(prediction_new)):
    final_values_new.append(prediction_new[i][0][0])
    
df_final_new = pd.DataFrame(0, index=np.arange(len(test_new)), columns=test_new.columns)
df_final_new['BRK.B']=final_values_new
df_final_new = scalerall.inverse_transform(df_final_new)
final_values_rescaled_new=[]
for i in range(len(df_final_new)):
    final_values_rescaled_new.append(df_final_new[i][0])

trace1 = go.Scatter(
x=pd.to_datetime(test_new_dates), y=data_new['BRK.B'].iloc[:,0],
    mode='lines', name='Real'
)
trace2 = go.Scatter(
x=pd.to_datetime(test_new_dates), y=final_values_rescaled_new,
    mode='lines', name='Prediction_new'
)

trace3 = go.Scatter(
x=pd.to_datetime(traindatesall), y=trainall.iloc[:,0],
    mode='lines', name='Datatrain'
)
trace4 = go.Scatter(
x=pd.to_datetime(testdates), y=test.iloc[:,0],
    mode='lines', name='Datatest'
)

trace5 = go.Scatter(
x=pd.to_datetime(testdates), y=final_values_rescaled,
    mode='lines', name='Prediction'
)


layout= go.Layout(
title= 'S&P 500 Forecast March - APril',
xaxis={'title':'Date'},
yaxis={'title':'Close'}
)

fig= go.Figure(data=[trace1,trace2,trace3,trace4,trace5], layout=layout)
fig.show()
In [42]:
mse = mean_squared_error(data_new['BRK.B'].iloc[:,0], final_values_rescaled_new)
rmse = math.sqrt(mse)
rmse
Out[42]:
5.731638327199918
In [43]:
model_json = model.to_json()
with open("model_MSFT.json", "w") as json_file:
    json_file.write(model_json)
model.save_weights("model_MSFT.h5")
In [44]:
import joblib
joblib.dump(scalerall, 'scaler_MSFT.gz')
Out[44]:
['scaler_MSFT.gz']
In [ ]: